Hello … i can not imagine what i do wrong …
NSString *libPath = [[NSBundle mainBundle] pathForResource:@"./default" ofType:@"metallib"];
id<MTLLibrary> library = [device newLibraryWithFile:libPath error:&error];
Same dir like the binary
default.metallib prog
To run wit options like
prog —option
My prog say always if i run lib not found …
Hard&Software :
Xcode 13.4.1 (13F100)
mbp M1 max
Post
Replies
Boosts
Views
Activity
my Mac got unsupported architecture
Xcode: 13.4.1
included headers:
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <ctype.h>
#include <memory.h>
/*
* Architecture validation for current SDK
*/
#if !defined(__sys_cdefs_arch_unknown__) && defined(__i386__)
#elif !defined(__sys_cdefs_arch_unknown__) && defined(__x86_64__)
#elif !defined(__sys_cdefs_arch_unknown__) && defined(__arm__)
#elif !defined(__sys_cdefs_arch_unknown__) && defined(__arm64__)
#else
#error Unsupported architecture
#endif
where is arm64e ???
hello ... iv'e got a weird problem with this program
on macOS Ventura 13.1
on a MacBook Pro M1 MAX
and clang
prog:
#include <stdlib.h>
#include <stdint.h>
#include <stdbool.h>
#include <stdarg.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <math.h>
#include <unistd.h>
#include <string.h>
#include <ctype.h>
#include <memory.h>
#include <iconv.h>
#define CA_PRIVATE_IMPLEMENTATION
static const char *coin(int fex) {
bool isDuplicate = false;
//---------------------------------------------------------------------
const char charset[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
int max_index = (sizeof(charset) - 1);
//---------------------------------------------------------------------
const char *picked = NULL;
const char *number = NULL;
int j = 0,i = 0;
for (i = 0; i < fex; i++)
{
do
{
int target = rand() % max_index; // Generate
strncpy((char *)number,&charset[target],1); // Pick
// Check for duplicates
for (j = 0; j < fex; j++)
{
if (number == &picked[j])
{
isDuplicate = true;
break; // Duplicate detected
}
else {
memmove((void *)&charset[target], &charset[target + 1], max_index - target);
max_index--;
isDuplicate = false;
break; // No Duplicate detected
}
} // end for
} // end do
while (isDuplicate); // equivalent to while(isDuplicate == true)
if (!isDuplicate) { // equivalent to if(isDuplicate == false)
strncpy((char *)&picked[j],number,1); // picked
}
} // end for
return picked;
}
int main(void) {
time_t start, stop;
start = time(NULL);
for (int i = 0; i < 6; i++){
printf("%s\n", coin(26));
}
printf("%s\n",coin(5));
stop = time(NULL);
printf("Time elapsed : %ld seconds\n",(stop - start));
return 0;
}
hello
I've got a problem with C pthread on macOS Ventura 13.2.1
and some troubles with libxml2 it load the full xml parameters on first try to load the first parameter it load all 4 in one line.
I cannot figure out is pthreads the problem or the xml file what loaded out off memory ... like:
punch_allloadthreads4
xml file
<?xml version="1.0" encoding="utf-8"?>
<um-configuration version="1.0">
<config>
<param1 name="punch_all"></param1>
<param2 name="load"></param2>
<param3 name="threads"></param3>
<param4 name="4"></param4>
</config>
</um-configuration>
well got autoupdates on
here is a lldb -f /usr/local/bin/enigma output ...
Process 4230 stopped
* thread #1, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=1, address=0x0)
frame #0: 0x00000001882cb700 libsystem_pthread.dylib`_pthread_create + 1084
libsystem_pthread.dylib`:
-> 0x1882cb700 <+1084>: str x25, [x22]
0x1882cb704 <+1088>: mov x0, x21
0x1882cb708 <+1092>: ldp x29, x30, [sp, #0x70]
0x1882cb70c <+1096>: ldp x20, x19, [sp, #0x60]
thread #2, stop reason = EXC_BAD_ACCESS (code=2, address=0x10000b26e)
frame #0: 0x00000001882f8090 libsystem_platform.dylib`_platform_memmove + 448
libsystem_platform.dylib`:
-> 0x1882f8090 <+448>: strb w6, [x3], #0x1
0x1882f8094 <+452>: subs x2, x2, #0x1
0x1882f8098 <+456>: b.ne 0x1882f808c ; <+444>
0x1882f809c <+460>: ret
Target 0: (enigma) stopped.
(lldb) bt
* thread #1, queue = 'com.apple.main-thread', stop reason = EXC_BAD_ACCESS (code=1, address=0x0)
* frame #0: 0x00000001882cb700 libsystem_pthread.dylib`_pthread_create + 1084
frame #1: 0x0000000100009f5c enigma`sbfParams + 1448
frame #2: 0x000000010000aa48 enigma`main + 1456
frame #3: 0x0000000187f9fe50 dyld`start + 2544
I cannot find the bug ... but run this code (python) on torch device mps0 is slow
quicker and cpu0 or cpu1 ... but where is the bug? or run it on neural engine with cpu1?
you need a setup like this:
#!/bin/bash
export HOMEBREW_BREW_GIT_REMOTE="https://github.com/Homebrew/brew" # put your Git mirror of Homebrew/brew here
export HOMEBREW_CORE_GIT_REMOTE="https://github.com/Homebrew/homebrew-core" # put your Git mirror of Homebrew/homebrew-core here
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
eval "$(/opt/homebrew/bin/brew shellenv)"
brew update --force --quiet
chmod -R go-w "$(brew --prefix)/share/zsh"
export OPENBLAS=$(/opt/homebrew/bin/brew --prefix openblas)
export CFLAGS="-falign-functions=8 ${CFLAGS}"
brew install wget
brew install unzip
conda init --all
conda create -n torch-gpu python=3.10
conda activate torch-gpu
conda install pytorch==1.8.0 torchvision==0.9.0 torchaudio==0.8.0 -c pytorch
conda install -c conda-forge jupyter jupyterlab
python3 -m pip install --upgrade pip
python3 -m pip install insightface==0.2.1 onnx imageio scikit-learn scikit-image moviepy
python3 -m pip install googledrivedownloader
python3 -m pip install imageio==2.4.1
python3 -m pip install Cython
python3 -m pip install --no-use-pep517 numpy
python3 -m pip install torch
python3 -m pip install image
python3 -m pip install timm
python3 -m pip install PlL
python3 -m pip install h5py
for i in `seq 1 6`; do
python3 test.py
done
conda deactivate
exit 0
test.py:
import torch
import math
# this ensures that the current MacOS version is at least 12.3+
print(torch.backends.mps.is_available())
# this ensures that the current current PyTorch installation was built with MPS activated.
print(torch.backends.mps.is_built())
dtype = torch.float
device = torch.device("cpu",0)
#device = torch.device("cpu",1)
#device = torch.device("mps",0)
# Create random input and output data
x = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype)
y = torch.sin(x)
# Randomly initialize weights
a = torch.randn((), device=device, dtype=dtype)
b = torch.randn((), device=device, dtype=dtype)
c = torch.randn((), device=device, dtype=dtype)
d = torch.randn((), device=device, dtype=dtype)
learning_rate = 1e-6
for t in range(2000):
# Forward pass: compute predicted y
y_pred = a + b * x + c * x ** 2 + d * x ** 3
# Compute and print loss
loss = (y_pred - y).pow(2).sum().item()
if t % 100 == 99:
print(t, loss)
# Backprop to compute gradients of a, b, c, d with respect to loss
grad_y_pred = 2.0 * (y_pred - y)
grad_a = grad_y_pred.sum()
grad_b = (grad_y_pred * x).sum()
grad_c = (grad_y_pred * x ** 2).sum()
grad_d = (grad_y_pred * x ** 3).sum()
# Update weights using gradient descent
a -= learning_rate * grad_a
b -= learning_rate * grad_b
c -= learning_rate * grad_c
d -= learning_rate * grad_d
print(f'Result: y = {a.item()} + {b.item()} x + {c.item()} x^2 + {d.item()} x^3')